Fix x86/64 build.
Signed-off-by: Keir Fraser <keir.fraser@cl.cam.ac.uk>
free_shadow_page(d, &frame_table[smfn]);
}
-#ifdef CONFIG_VMX
+/*
+ * XXX KAF:
+ * 1. Why is this VMX specific?
+ * 2. Why is VMX using clear_state() rather than free_state()?
+ * (could we get rid of clear_state and fold into free_state?)
+ */
void vmx_shadow_clear_state(struct domain *d)
{
SH_VVLOG("vmx_clear_shadow_state:");
clear_shadow_state(d);
shadow_unlock(d);
}
-#endif
-
unsigned long shadow_l2_table(
struct domain *d, unsigned long gmfn)
ASSERT(shadow_mode_enabled(ed->domain));
+ /*
+ * XXX KAF: Why is this set-to-zero required?
+ * Why, on failure, must we bin all our shadow state?
+ */
if (__put_user(0L, (unsigned long *)
&shadow_linear_pg_table[va >> PAGE_SHIFT])) {
vmx_shadow_clear_state(ed->domain);
struct desc_struct fast_trap_desc;
#endif
trap_info_t traps[256];
-#ifdef CONFIG_VMX
- struct arch_vmx_struct arch_vmx; /* Virtual Machine Extensions */
-#endif
+
+ /* Virtual Machine Extensions */
+ struct arch_vmx_struct arch_vmx;
/*
* Every domain has a L1 pagetable of its own. Per-domain mappings
#include <xen/types.h>
#include <xen/perfc.h>
#include <asm/processor.h>
-
-#ifdef CONFIG_VMX
#include <asm/domain_page.h>
-#endif
/* Shadow PT flag bits in pfn_info */
#define PSH_shadowed (1<<31) /* page has a shadow. PFN points to shadow */
extern void free_shadow_state(struct domain *d);
extern void shadow_invlpg(struct exec_domain *, unsigned long);
-#ifdef CONFIG_VMX
extern void vmx_shadow_clear_state(struct domain *);
-#endif
#define __mfn_to_gpfn(_d, mfn) \
( (shadow_mode_translate(_d)) \
shadow_audit(d, 0);
}
-#ifdef CONFIG_VMX
-
static inline unsigned long gva_to_gpte(unsigned long gva)
{
unsigned long gpde, gpte, pfn, index;
return (gpte & PAGE_MASK) + (gva & ~PAGE_MASK);
}
-#endif /* CONFIG_VMX */
-
static inline void __update_pagetables(struct exec_domain *ed)
{
struct domain *d = ed->domain;